-0x00010000 CPU%(cpu)d %(tsc)d sched_add_domain [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
-0x00010001 CPU%(cpu)d %(tsc)d sched_rem_domain [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
-0x00010002 CPU%(cpu)d %(tsc)d domain_sleep [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
-0x00010003 CPU%(cpu)d %(tsc)d domain_wake [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
-0x00010004 CPU%(cpu)d %(tsc)d do_yield [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
-0x00010005 CPU%(cpu)d %(tsc)d do_block [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
-0x00010006 CPU%(cpu)d %(tsc)d domain_shutdown [ domid = 0x%(1)08x, edomid = 0x%(2)08x, reason = 0x%(3)08x ]
-0x00010007 CPU%(cpu)d %(tsc)d sched_ctl
-0x00010008 CPU%(cpu)d %(tsc)d sched_adjdom [ domid = 0x%(1)08x ]
-0x00010009 CPU%(cpu)d %(tsc)d __enter_scheduler [ prev<domid:edomid> = 0x%(1)08x : 0x%(2)08x, next<domid:edomid> = 0x%(3)08x : 0x%(4)08x ]
-0x0001000A CPU%(cpu)d %(tsc)d s_timer_fn
-0x0001000B CPU%(cpu)d %(tsc)d t_timer_fn
-0x0001000C CPU%(cpu)d %(tsc)d dom_timer_fn
+0x00020001 CPU%(cpu)d %(tsc)d sched_add_domain [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
+0x00020002 CPU%(cpu)d %(tsc)d sched_rem_domain [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
+0x00020003 CPU%(cpu)d %(tsc)d domain_sleep [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
+0x00020004 CPU%(cpu)d %(tsc)d domain_wake [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
+0x00020005 CPU%(cpu)d %(tsc)d do_yield [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
+0x00020006 CPU%(cpu)d %(tsc)d do_block [ domid = 0x%(1)08x, edomid = 0x%(2)08x ]
+0x00020007 CPU%(cpu)d %(tsc)d domain_shutdown [ domid = 0x%(1)08x, edomid = 0x%(2)08x, reason = 0x%(3)08x ]
+0x00020008 CPU%(cpu)d %(tsc)d sched_ctl
+0x00020009 CPU%(cpu)d %(tsc)d sched_adjdom [ domid = 0x%(1)08x ]
+0x0002000a CPU%(cpu)d %(tsc)d __enter_scheduler [ prev<domid:edomid> = 0x%(1)08x : 0x%(2)08x, next<domid:edomid> = 0x%(3)08x : 0x%(4)08x ]
+0x0002000B CPU%(cpu)d %(tsc)d s_timer_fn
+0x0002000c CPU%(cpu)d %(tsc)d t_timer_fn
+0x0002000d CPU%(cpu)d %(tsc)d dom_timer_fn
-
-0x00020008 CPU%(cpu)d %(tsc)d enter: dom0_create_dom
-0x00030008 CPU%(cpu)d %(tsc)d leave: dom0_create_dom
-0x00020009 CPU%(cpu)d %(tsc)d enter: dom0_destroy_dom
-0x00030009 CPU%(cpu)d %(tsc)d leave: dom0_destroy_dom
-0x0002000A CPU%(cpu)d %(tsc)d enter: dom0_start_dom
-0x0003000A CPU%(cpu)d %(tsc)d leave: dom0_start_dom
-0x0002000B CPU%(cpu)d %(tsc)d enter: dom0_stop_dom
-0x0003000B CPU%(cpu)d %(tsc)d leave: dom0_stop_dom
-0x0002000C CPU%(cpu)d %(tsc)d enter: dom0_getinfo
-0x0003000C CPU%(cpu)d %(tsc)d leave: dom0_getinfo
-0x0002000D CPU%(cpu)d %(tsc)d enter: dom0_build
-0x0003000D CPU%(cpu)d %(tsc)d leave: dom0_build
-0x00020019 CPU%(cpu)d %(tsc)d enter: dom0_shadow_op
-0x00030019 CPU%(cpu)d %(tsc)d leave: dom0_shadow_op
-
-
-0x00040001 CPU%(cpu)d %(tsc)d VMX_VMEXIT [ domid = 0x%(1)08x, eip = 0x%(2)08x, reason = 0x%(3)08x ]
-0x00040002 CPU%(cpu)d %(tsc)d VMX_VECTOR [ domid = 0x%(1)08x, eip = 0x%(2)08x, vector = 0x%(3)08x ]
-0x00040003 CPU%(cpu)d %(tsc)d VMX_INT [ domid = 0x%(1)08x, trap = 0x%(2)08x, va = 0x%(3)08x ]
\ No newline at end of file
+0x00080001 CPU%(cpu)d %(tsc)d VMX_VMEXIT [ domid = 0x%(1)08x, eip = 0x%(2)08x, reason = 0x%(3)08x ]
+0x00080002 CPU%(cpu)d %(tsc)d VMX_VECTOR [ domid = 0x%(1)08x, eip = 0x%(2)08x, vector = 0x%(3)08x ]
+0x00080003 CPU%(cpu)d %(tsc)d VMX_INT [ domid = 0x%(1)08x, trap = 0x%(2)08x, va = 0x%(3)08x ]
#include <time.h>
#include <stdlib.h>
-#include <sys/mman.h>
#include <stdio.h>
-#include <sys/types.h>
+#include <sys/mman.h>
#include <sys/stat.h>
+#include <sys/types.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
dom0_op_t op; /* dom0 op we'll build */
int xc_handle = xc_interface_open(); /* for accessing control interface */
- op.cmd = DOM0_GETTBUFS;
+ op.cmd = DOM0_TBUFCONTROL;
op.interface_version = DOM0_INTERFACE_VERSION;
+ op.u.tbufcontrol.op = DOM0_TBUF_GET_INFO;
ret = do_dom0_op(xc_handle, &op);
exit(EXIT_FAILURE);
}
- *mach_addr = op.u.gettbufs.mach_addr;
- *size = op.u.gettbufs.size;
+ *mach_addr = op.u.tbufcontrol.mach_addr;
+ *size = op.u.tbufcontrol.size;
}
/**
}
tbufs_mapped = xc_map_foreign_range(xc_handle, 0 /* Dom 0 ID */,
- size * num, PROT_READ,
- tbufs_mach >> PAGE_SHIFT);
+ size * num, PROT_READ,
+ tbufs_mach >> PAGE_SHIFT);
xc_interface_close(xc_handle);
exit(EXIT_FAILURE);
}
- return (struct t_buf *)tbufs_mapped;
+ return tbufs_mapped;
}
/* initialise pointers to the trace buffers - given the size of a trace
* buffer and the value of bufs_maped, we can easily calculate these */
for ( i = 0; i<num; i++ )
- user_ptrs[i] = (struct t_buf *)(
- (unsigned long)bufs_mapped + size * i);
+ user_ptrs[i] = (struct t_buf *)((unsigned long)bufs_mapped + size * i);
return user_ptrs;
}
exit(EXIT_FAILURE);
}
- for ( i = 0; i<num; i++ )
- data[i] = (struct t_rec *)(meta[i]->data - tbufs_mach
- + (unsigned long)tbufs_mapped);
+ for ( i = 0; i < num; i++ )
+ data[i] = (struct t_rec *)(meta[i]->rec_addr - tbufs_mach
+ + (unsigned long)tbufs_mapped);
return data;
}
}
for ( i = 0; i<num; i++ )
- tails[i] = bufs[i]->head;
+ tails[i] = atomic_read(&bufs[i]->rec_idx);
return tails;
}
get_tbufs(&tbufs_mach, &size);
tbufs_mapped = map_tbufs(tbufs_mach, num, size);
- size_in_recs = (size / sizeof(struct t_rec) )-1;
+ size_in_recs = (size - sizeof(struct t_buf)) / sizeof(struct t_rec);
/* build arrays of convenience ptrs */
meta = init_bufs_ptrs (tbufs_mapped, num, size);
while ( !interrupted )
{
for ( i = 0; ( i < num ) && !interrupted; i++ )
- while( cons[i] != meta[i]->head )
- {
- write_rec(i, data[i] + (cons[i] % size_in_recs), logfile);
- cons[i]++;
- }
+ while( cons[i] != atomic_read(&meta[i]->rec_idx) )
+ {
+ write_rec(i, data[i] + cons[i], logfile);
+ cons[i] = (cons[i] + 1) % size_in_recs;
+ }
nanosleep(&opts.poll_sleep, NULL);
}
/* ensure that if we get a signal, we'll do cleanup, then exit */
act.sa_handler = close_handler;
- sigaction(SIGHUP, &act, 0);
- sigaction(SIGTERM, &act, 0);
- sigaction(SIGINT, &act, 0);
+ act.sa_flags = 0;
+ sigemptyset(&act.sa_mask);
+ sigaction(SIGHUP, &act, NULL);
+ sigaction(SIGTERM, &act, NULL);
+ sigaction(SIGINT, &act, NULL);
ret = monitor_tbufs(logfile);
#include <xen/physdev.h>
#include <public/sched_ctl.h>
-#define TRC_DOM0OP_ENTER_BASE 0x00020000
-#define TRC_DOM0OP_LEAVE_BASE 0x00030000
-
extern unsigned int alloc_new_dom_mem(struct domain *, unsigned int);
extern long arch_do_dom0_op(dom0_op_t *op, dom0_op_t *u_dom0_op);
extern void arch_getdomaininfo_ctxt(
break;
#ifdef TRACE_BUFFER
- case DOM0_GETTBUFS:
+ case DOM0_TBUFCONTROL:
{
- ret = get_tb_info(&op->u.gettbufs);
+ ret = tb_control(&op->u.tbufcontrol);
copy_to_user(u_dom0_op, op, sizeof(*op));
}
break;
#define TIME_SLOP (s32)MICROSECS(50) /* allow time to slip a bit */
-/*
- * TODO MAW pull trace-related #defines out of here and into an auto-generated
- * header file later on!
- */
-#define TRC_SCHED_DOM_ADD 0x00010000
-#define TRC_SCHED_DOM_REM 0x00010001
-#define TRC_SCHED_SLEEP 0x00010002
-#define TRC_SCHED_WAKE 0x00010003
-#define TRC_SCHED_YIELD 0x00010004
-#define TRC_SCHED_BLOCK 0x00010005
-#define TRC_SCHED_SHUTDOWN 0x00010006
-#define TRC_SCHED_CTL 0x00010007
-#define TRC_SCHED_ADJDOM 0x00010008
-#define TRC_SCHED_SWITCH 0x00010009
-#define TRC_SCHED_S_TIMER_FN 0x0001000A
-#define TRC_SCHED_T_TIMER_FN 0x0001000B
-#define TRC_SCHED_DOM_TIMER_FN 0x0001000C
-
/* Various timer handlers. */
static void s_timer_fn(unsigned long unused);
static void t_timer_fn(unsigned long unused);
* Author: Mark Williamson, mark.a.williamson@intel.com
* Date: January 2004
*
+ * Copyright (C) 2005 Bin Ren
+ *
* The trace buffer code is designed to allow debugging traces of Xen to be
* generated on UP / SMP machines. Each trace entry is timestamped so that
* it's possible to reconstruct a chronological record of trace events.
/* a flag recording whether initialisation has been done */
int tb_init_done = 0;
+/* which CPUs tracing is enabled on */
+unsigned long tb_cpu_mask = (~0UL);
+
+/* which tracing events are enabled */
+u32 tb_event_mask = TRC_ALL;
/**
* init_trace_bufs - performs initialisation of the per-cpu trace buffers.
*
}
/* Share pages so that xentrace can map them. */
-
for ( i = 0; i < nr_pages; i++ )
- SHARE_PFN_WITH_DOMAIN(virt_to_page(rawbuf+(i*PAGE_SIZE)), dom0);
+ SHARE_PFN_WITH_DOMAIN(virt_to_page(rawbuf + i * PAGE_SIZE), dom0);
for ( i = 0; i < smp_num_cpus; i++ )
{
buf = t_bufs[i] = (struct t_buf *)&rawbuf[i*opt_tbuf_size*PAGE_SIZE];
- /* For use in Xen. */
- buf->vdata = (struct t_rec *)(buf+1);
- buf->head_ptr = buf->vdata;
-
- /* For use in user space. */
- buf->data = __pa(buf->vdata);
- buf->head = 0;
-
- /* For use in both. */
- buf->size = (opt_tbuf_size * PAGE_SIZE - sizeof(struct t_buf))
- / sizeof(struct t_rec);
+ _atomic_set(buf->rec_idx, 0);
+ buf->rec_num = (opt_tbuf_size * PAGE_SIZE - sizeof(struct t_buf))
+ / sizeof(struct t_rec);
+ buf->rec = (struct t_rec *)(buf + 1);
+ buf->rec_addr = __pa(buf->rec);
}
printk("Xen trace buffers: initialised\n");
}
/**
- * get_tb_info - get trace buffer details
- * @st: a pointer to a dom0_gettbufs_t to be filled out
- *
- * Called by the %DOM0_GETTBUFS dom0 op to fetch the machine address of the
- * trace buffers.
+ * tb_control - DOM0 operations on trace buffers.
+ * @tbc: a pointer to a dom0_tbufcontrol_t to be filled out
*/
-int get_tb_info(dom0_gettbufs_t *st)
+int tb_control(dom0_tbufcontrol_t *tbc)
{
- if ( tb_init_done )
- {
- st->mach_addr = __pa(t_bufs[0]);
- st->size = opt_tbuf_size * PAGE_SIZE;
-
- return 0;
- }
- else
+ static spinlock_t lock = SPIN_LOCK_UNLOCKED;
+ int rc = 0;
+
+ if ( !tb_init_done )
+ return -EINVAL;
+
+ spin_lock(&lock);
+
+ switch ( tbc->op)
{
- st->mach_addr = 0;
- st->size = 0;
- return -ENODATA;
+ case DOM0_TBUF_GET_INFO:
+ tbc->cpu_mask = tb_cpu_mask;
+ tbc->evt_mask = tb_event_mask;
+ tbc->mach_addr = __pa(t_bufs[0]);
+ tbc->size = opt_tbuf_size * PAGE_SIZE;
+ break;
+ case DOM0_TBUF_SET_CPU_MASK:
+ tb_cpu_mask = tbc->cpu_mask;
+ break;
+ case DOM0_TBUF_SET_EVT_MASK:
+ tb_event_mask = tbc->evt_mask;
+ break;
+ default:
+ rc = -EINVAL;
}
+
+ spin_unlock(&lock);
+
+ return rc;
}
/*
return 0;
}
-#define TRC_VMX_VMEXIT 0x00040001
-#define TRC_VMX_VECTOR 0x00040002
-#define TRC_VMX_INT 0x00040003
-
#endif /* __ASM_X86_VMX_H__ */
} dom0_pincpudomain_t;
/* Get trace buffers machine base address */
-#define DOM0_GETTBUFS 21
+#define DOM0_TBUFCONTROL 21
typedef struct {
+ /* IN variables */
+#define DOM0_TBUF_GET_INFO 0
+#define DOM0_TBUF_SET_CPU_MASK 1
+#define DOM0_TBUF_SET_EVT_MASK 2
+ u8 op;
+ /* IN/OUT variables */
+ unsigned long cpu_mask;
+ u32 evt_mask;
/* OUT variables */
memory_t mach_addr;
u32 size;
-} dom0_gettbufs_t;
+} dom0_tbufcontrol_t;
/*
* Get physical information about the host machine
dom0_msr_t msr;
dom0_debug_t debug;
dom0_settime_t settime;
- dom0_readconsole_t readconsole;
+ dom0_readconsole_t readconsole;
dom0_pincpudomain_t pincpudomain;
- dom0_gettbufs_t gettbufs;
+ dom0_tbufcontrol_t tbufcontrol;
dom0_physinfo_t physinfo;
dom0_pcidev_access_t pcidev_access;
dom0_sched_id_t sched_id;
/******************************************************************************
- * trace.h
+ * include/public/trace.h
*
* Mark Williamson, (C) 2004 Intel Research Cambridge
+ * Copyright (C) 2005 Bin Ren
*/
#ifndef __XEN_PUBLIC_TRACE_H__
#define __XEN_PUBLIC_TRACE_H__
+#include <asm/atomic.h>
+
+/* Trace classes */
+#define TRC_GEN 0x00010000 /* General trace */
+#define TRC_SCHED 0x00020000 /* Xen Scheduler trace */
+#define TRC_DOM0OP 0x00040000 /* Xen DOM0 operation trace */
+#define TRC_VMX 0x00080000 /* Xen VMX trace */
+#define TRC_ALL 0xffff0000
+
+/* Trace events per class */
+
+#define TRC_SCHED_DOM_ADD (TRC_SCHED + 1)
+#define TRC_SCHED_DOM_REM (TRC_SCHED + 2)
+#define TRC_SCHED_SLEEP (TRC_SCHED + 3)
+#define TRC_SCHED_WAKE (TRC_SCHED + 4)
+#define TRC_SCHED_YIELD (TRC_SCHED + 5)
+#define TRC_SCHED_BLOCK (TRC_SCHED + 6)
+#define TRC_SCHED_SHUTDOWN (TRC_SCHED + 7)
+#define TRC_SCHED_CTL (TRC_SCHED + 8)
+#define TRC_SCHED_ADJDOM (TRC_SCHED + 9)
+#define TRC_SCHED_SWITCH (TRC_SCHED + 10)
+#define TRC_SCHED_S_TIMER_FN (TRC_SCHED + 11)
+#define TRC_SCHED_T_TIMER_FN (TRC_SCHED + 12)
+#define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED + 13)
+
+#define TRC_VMX_VMEXIT (TRC_VMX + 1)
+#define TRC_VMX_VECTOR (TRC_VMX + 2)
+#define TRC_VMX_INT (TRC_VMX + 3)
+
/* This structure represents a single trace buffer record. */
struct t_rec {
- u64 cycles; /* 64 bit cycle counter timestamp */
- u32 event; /* 32 bit event ID */
- u32 d1, d2, d3, d4, d5; /* event data items */
+ u64 cycles; /* cycle counter timestamp */
+ u32 event; /* event ID */
+ unsigned long data[5]; /* event data items */
};
/*
* field, indexes into an array of struct t_rec's.
*/
struct t_buf {
- unsigned long data; /* pointer to data area. machine address
- * for convenience in user space code */
-
- unsigned long size; /* size of the data area, in t_recs */
- unsigned long head; /* array index of the most recent record */
-
- /* Xen-private elements follow... */
- struct t_rec *head_ptr; /* pointer to the head record */
- struct t_rec *vdata; /* virtual address pointer to data */
+ /* Used by both Xen and user space. */
+ atomic_t rec_idx; /* the next record to save to */
+ unsigned int rec_num; /* number of records in this trace buffer */
+ /* Used by Xen only. */
+ struct t_rec *rec; /* start of records */
+ /* Used by user space only. */
+ unsigned long rec_addr; /* machine address of the start of records */
};
#endif /* __XEN_PUBLIC_TRACE_H__ */
* Author: Mark Williamson, mark.a.williamson@intel.com
* Date: January 2004
*
+ * Copyright (C) 2005 Bin Ren
+ *
* The trace buffer code is designed to allow debugging traces of Xen to be
* generated on UP / SMP machines. Each trace entry is timestamped so that
* it's possible to reconstruct a chronological record of trace events.
#ifdef TRACE_BUFFER
-#include <xen/spinlock.h>
#include <asm/page.h>
#include <xen/types.h>
#include <xen/sched.h>
#include <asm/msr.h>
#include <public/dom0_ops.h>
+extern struct t_buf *t_bufs[];
+extern int tb_init_done;
+extern unsigned long tb_cpu_mask;
+extern u32 tb_event_mask;
+
/* Used to initialise trace buffer functionality */
void init_trace_bufs(void);
/* used to retrieve the physical address of the trace buffers */
-int get_tb_info(dom0_gettbufs_t *st);
+int tb_control(dom0_tbufcontrol_t *tbc);
/**
* trace - Enters a trace tuple into the trace buffer for the current CPU.
* failure, otherwise 0. Failure occurs only if the trace buffers are not yet
* initialised.
*/
-static inline int trace(u32 event, u32 d1, u32 d2, u32 d3, u32 d4, u32 d5)
+static inline int trace(u32 event, unsigned long d1, unsigned long d2,
+ unsigned long d3, unsigned long d4, unsigned long d5)
{
- extern struct t_buf *t_bufs[]; /* global array of pointers to bufs */
- extern int tb_init_done; /* set when buffers are initialised */
- unsigned long flags; /* for saving interrupt flags */
- struct t_buf *buf; /* the buffer we're working on */
- struct t_rec *rec; /* next record to fill out */
-
+ atomic_t old, new, seen;
+ struct t_buf *buf;
+ struct t_rec *rec;
if ( !tb_init_done )
return -1;
+ if ( (tb_event_mask & event) == 0 )
+ return 0;
+
+ if ( (tb_cpu_mask & (1UL << smp_processor_id())) == 0 )
+ return 0;
buf = t_bufs[smp_processor_id()];
- local_irq_save(flags);
+ do
+ {
+ old = buf->rec_idx;
+ _atomic_set(new, (_atomic_read(old) + 1) % buf->rec_num);
+ seen = atomic_compareandswap(old, new, &buf->rec_idx);
+ }
+ while ( unlikely(_atomic_read(seen) != _atomic_read(old)) );
- rec = buf->head_ptr;
+ wmb();
+ rec = &buf->rec[_atomic_read(old)];
rdtscll(rec->cycles);
- rec->event = event;
- rec->d1 = d1;
- rec->d2 = d2;
- rec->d3 = d3;
- rec->d4 = d4;
- rec->d5 = d5;
-
- wmb(); /* above must be visible before reader sees index updated */
-
- buf->head_ptr++;
- buf->head++;
- if ( buf->head_ptr == (buf->vdata + buf->size) )
- buf->head_ptr = buf->vdata;
-
- local_irq_restore(flags);
-
+ rec->event = event;
+ rec->data[0] = d1;
+ rec->data[1] = d2;
+ rec->data[2] = d3;
+ rec->data[3] = d4;
+ rec->data[4] = d5;
+
return 0;
}